var runtime.work
226 uses
runtime (current package)
mgc.go#L163: work.startSema = 1
mgc.go#L164: work.markDoneSema = 1
mgc.go#L165: lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
mgc.go#L166: lockInit(&work.assistQueue.lock, lockRankAssistQueue)
mgc.go#L167: lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
mgc.go#L281: var work struct {
mgc.go#L436: n := atomic.Load(&work.cycles)
mgc.go#L451: for atomic.Load(&work.cycles) == n+1 && sweepone() != ^uintptr(0) {
mgc.go#L467: for atomic.Load(&work.cycles) == n+1 && !isSweepDone() {
mgc.go#L475: cycle := atomic.Load(&work.cycles)
mgc.go#L487: lock(&work.sweepWaiters.lock)
mgc.go#L488: nMarks := atomic.Load(&work.cycles)
mgc.go#L495: unlock(&work.sweepWaiters.lock)
mgc.go#L501: work.sweepWaiters.list.push(getg())
mgc.go#L502: goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceEvGoBlock, 1)
mgc.go#L564: return int32(t.n-work.cycles) > 0
mgc.go#L604: semacquire(&work.startSema)
mgc.go#L607: semrelease(&work.startSema)
mgc.go#L612: work.userForced = trigger.kind == gcTriggerCycle
mgc.go#L645: work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
mgc.go#L646: if work.stwprocs > ncpu {
mgc.go#L649: work.stwprocs = ncpu
mgc.go#L651: work.heap0 = atomic.Load64(&gcController.heapLive)
mgc.go#L652: work.pauseNS = 0
mgc.go#L653: work.mode = mode
mgc.go#L656: work.tSweepTerm = now
mgc.go#L657: work.pauseStart = now
mgc.go#L671: work.cycles++
mgc.go#L676: work.heapGoal = gcController.heapGoal
mgc.go#L725: work.pauseNS += now - work.pauseStart
mgc.go#L726: work.tMark = now
mgc.go#L727: memstats.gcPauseDist.record(now - work.pauseStart)
mgc.go#L743: semrelease(&work.startSema)
mgc.go#L778: semacquire(&work.markDoneSema)
mgc.go#L787: if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
mgc.go#L788: semrelease(&work.markDoneSema)
mgc.go#L840: work.tMarkTerm = now
mgc.go#L841: work.pauseStart = now
mgc.go#L874: work.pauseNS += now - work.pauseStart
mgc.go#L875: memstats.gcPauseDist.record(now - work.pauseStart)
mgc.go#L892: semrelease(&work.markDoneSema)
mgc.go#L901: nextTriggerRatio := gcController.endCycle(now, int(gomaxprocs), work.userForced)
mgc.go#L913: work.heap1 = gcController.heapLive
mgc.go#L941: work.heap2 = work.bytesMarked
mgc.go#L958: gcSweep(work.mode)
mgc.go#L987: work.pauseNS += now - work.pauseStart
mgc.go#L988: work.tEnd = now
mgc.go#L989: memstats.gcPauseDist.record(now - work.pauseStart)
mgc.go#L992: memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
mgc.go#L994: memstats.pause_total_ns += uint64(work.pauseNS)
mgc.go#L997: sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm)
mgc.go#L1001: markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm)
mgc.go#L1003: work.totaltime += cycleCpu
mgc.go#L1007: memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu)
mgc.go#L1013: if work.userForced {
mgc.go#L1018: lock(&work.sweepWaiters.lock)
mgc.go#L1020: injectglist(&work.sweepWaiters.list)
mgc.go#L1021: unlock(&work.sweepWaiters.lock)
mgc.go#L1073: " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
mgc.go#L1075: prev := work.tSweepTerm
mgc.go#L1076: for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
mgc.go#L1094: work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
mgc.go#L1095: work.heapGoal>>20, " MB goal, ",
mgc.go#L1098: work.maxprocs, " P")
mgc.go#L1099: if work.userForced {
mgc.go#L1133: notetsleepg(&work.bgMarkReady, -1)
mgc.go#L1134: noteclear(&work.bgMarkReady)
mgc.go#L1154: work.nproc = ^uint32(0)
mgc.go#L1155: work.nwait = ^uint32(0)
mgc.go#L1186: notewakeup(&work.bgMarkReady)
mgc.go#L1257: decnwait := atomic.Xadd(&work.nwait, -1)
mgc.go#L1258: if decnwait == work.nproc {
mgc.go#L1259: println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
mgc.go#L1309: incnwait := atomic.Xadd(&work.nwait, +1)
mgc.go#L1310: if incnwait > work.nproc {
mgc.go#L1312: "work.nwait=", incnwait, "work.nproc=", work.nproc)
mgc.go#L1324: if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
mgc.go#L1343: if !work.full.empty() {
mgc.go#L1346: if work.markrootNext < work.markrootJobs {
mgc.go#L1363: work.tstart = startTime
mgc.go#L1366: if work.full != 0 || work.markrootNext < work.markrootJobs {
mgc.go#L1367: print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
mgc.go#L1376: if work.full != 0 {
mgc.go#L1383: work.stackRoots = nil
mgc.go#L1444: gcController.resetLive(work.bytesMarked)
mgc.go#L1532: work.bytesMarked = 0
mgc.go#L1533: work.initialHeapLive = atomic.Load64(&gcController.heapLive)
mgcmark.go#L66: work.nDataRoots = 0
mgcmark.go#L67: work.nBSSRoots = 0
mgcmark.go#L72: if nDataRoots > work.nDataRoots {
mgcmark.go#L73: work.nDataRoots = nDataRoots
mgcmark.go#L79: if nBSSRoots > work.nBSSRoots {
mgcmark.go#L80: work.nBSSRoots = nBSSRoots
mgcmark.go#L97: work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
mgcmark.go#L105: work.stackRoots = allGsSnapshot()
mgcmark.go#L106: work.nStackRoots = len(work.stackRoots)
mgcmark.go#L108: work.markrootNext = 0
mgcmark.go#L109: work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
mgcmark.go#L112: work.baseData = uint32(fixedRootCount)
mgcmark.go#L113: work.baseBSS = work.baseData + uint32(work.nDataRoots)
mgcmark.go#L114: work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
mgcmark.go#L115: work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
mgcmark.go#L116: work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
mgcmark.go#L122: if work.markrootNext < work.markrootJobs {
mgcmark.go#L123: print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
mgcmark.go#L134: if i >= work.nStackRoots {
mgcmark.go#L168: case work.baseData <= i && i < work.baseBSS:
mgcmark.go#L171: workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
mgcmark.go#L174: case work.baseBSS <= i && i < work.baseSpans:
mgcmark.go#L177: workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
mgcmark.go#L191: case work.baseSpans <= i && i < work.baseStacks:
mgcmark.go#L193: markrootSpans(gcw, int(i-work.baseSpans))
mgcmark.go#L198: if i < work.baseStacks || work.baseEnd <= i {
mgcmark.go#L200: print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
mgcmark.go#L203: gp := work.stackRoots[i-work.baseStacks]
mgcmark.go#L209: gp.waitsince = work.tstart
mgcmark.go#L545: decnwait := atomic.Xadd(&work.nwait, -1)
mgcmark.go#L546: if decnwait == work.nproc {
mgcmark.go#L547: println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
mgcmark.go#L573: incnwait := atomic.Xadd(&work.nwait, +1)
mgcmark.go#L574: if incnwait > work.nproc {
mgcmark.go#L576: "work.nproc=", work.nproc)
mgcmark.go#L580: if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
mgcmark.go#L600: lock(&work.assistQueue.lock)
mgcmark.go#L601: list := work.assistQueue.q.popList()
mgcmark.go#L603: unlock(&work.assistQueue.lock)
mgcmark.go#L611: lock(&work.assistQueue.lock)
mgcmark.go#L616: unlock(&work.assistQueue.lock)
mgcmark.go#L621: oldList := work.assistQueue.q
mgcmark.go#L622: work.assistQueue.q.pushBack(gp)
mgcmark.go#L629: work.assistQueue.q = oldList
mgcmark.go#L633: unlock(&work.assistQueue.lock)
mgcmark.go#L637: goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
mgcmark.go#L652: if work.assistQueue.q.empty() {
mgcmark.go#L664: lock(&work.assistQueue.lock)
mgcmark.go#L665: for !work.assistQueue.q.empty() && scanBytes > 0 {
mgcmark.go#L666: gp := work.assistQueue.q.pop()
mgcmark.go#L688: work.assistQueue.q.pushBack(gp)
mgcmark.go#L699: unlock(&work.assistQueue.lock)
mgcmark.go#L1040: if work.markrootNext < work.markrootJobs {
mgcmark.go#L1043: job := atomic.Xadd(&work.markrootNext, +1) - 1
mgcmark.go#L1044: if job >= work.markrootJobs {
mgcmark.go#L1062: if work.full == 0 {
mgcmark.go#L1140: if work.full == 0 {
mgcmark.go#L1157: if work.markrootNext < work.markrootJobs {
mgcmark.go#L1158: job := atomic.Xadd(&work.markrootNext, +1) - 1
mgcmark.go#L1159: if job < work.markrootJobs {
mgcpacer.go#L452: work.initialHeapLive>>20, "->",
mgcpacer.go#L1182: print("runtime: heapGoal=", c.heapGoal, " heapMarked=", c.heapMarked, " gcController.heapLive=", c.heapLive, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n")
mgcpacer.go#L1265: gcWaitOnMark(atomic.Load(&work.cycles))
mgcwork.go#L116: lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
mgcwork.go#L275: atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
mgcwork.go#L351: if work.empty != 0 {
mgcwork.go#L352: b = (*workbuf)(work.empty.pop())
mgcwork.go#L359: lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
mgcwork.go#L364: if work.wbufSpans.free.first != nil {
mgcwork.go#L365: lock(&work.wbufSpans.lock)
mgcwork.go#L366: s = work.wbufSpans.free.first
mgcwork.go#L368: work.wbufSpans.free.remove(s)
mgcwork.go#L369: work.wbufSpans.busy.insert(s)
mgcwork.go#L371: unlock(&work.wbufSpans.lock)
mgcwork.go#L381: lock(&work.wbufSpans.lock)
mgcwork.go#L382: work.wbufSpans.busy.insert(s)
mgcwork.go#L383: unlock(&work.wbufSpans.lock)
mgcwork.go#L406: work.empty.push(&b.node)
mgcwork.go#L415: work.full.push(&b.node)
mgcwork.go#L422: b := (*workbuf)(work.full.pop())
mgcwork.go#L448: lock(&work.wbufSpans.lock)
mgcwork.go#L449: if work.full != 0 {
mgcwork.go#L455: work.empty = 0
mgcwork.go#L456: work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
mgcwork.go#L457: unlock(&work.wbufSpans.lock)
mgcwork.go#L464: lock(&work.wbufSpans.lock)
mgcwork.go#L465: if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
mgcwork.go#L466: unlock(&work.wbufSpans.lock)
mgcwork.go#L472: span := work.wbufSpans.free.first
mgcwork.go#L476: work.wbufSpans.free.remove(span)
mgcwork.go#L480: more := !work.wbufSpans.free.isEmpty()
mgcwork.go#L481: unlock(&work.wbufSpans.lock)